import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, UpSampling2D, BatchNormalization, SpatialDropout2D, Input, MaxPool2D, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.applications import vgg16
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.data import Dataset, AUTOTUNE
import tensorflow.strings
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
from glob import glob
from pathlib import Path
import os
2023-04-23 23:44:39.258461: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
PATH_TO_TRAINING = r'/Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training'
PATH_TO_TRAINING_TENSOR = tf.Variable(PATH_TO_TRAINING)
TRAIN_GLAUCOMA_IMAGES = np.array(glob(os.path.join(PATH_TO_TRAINING, 'Images/GLAUCOMA/*')))
TRAIN_NORMAL_IMAGES = np.array(glob(os.path.join(PATH_TO_TRAINING, 'Images/NORMAL/*')))
TRAIN_GLAUCOMA_GT = np.array(glob(PATH_TO_TRAINING + '/GT/GALUCOMA/*'))
TRAIN_NORMAL_GT = np.array(glob(PATH_TO_TRAINING + '/GT/NORMAL/*'))
PATH_TO_TEST = r'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test'
TEST_GLAUCOMA_IMAGES = np.array(glob(os.path.join(PATH_TO_TEST, 'Images/glaucoma/*')))
TEST_NORMAL_IMAGES = np.array(glob(os.path.join(PATH_TO_TEST, 'Images/normal/*')))
SEED = 2
EPSILON = 10 ** -4
STATELESS_RNG= tf.random.Generator.from_seed(SEED, alg='philox')
IMAGE_SIZE = (224, 224)
CHANNELS = 3
SHUFFLE_BUFFER = len(TRAIN_NORMAL_IMAGES) + len(TRAIN_GLAUCOMA_IMAGES)
BATCH_SIZE = SHUFFLE_BUFFER // 2
early_stopping = EarlyStopping(monitor = 'val_loss', restore_best_weights = True, patience = 5)
2023-04-23 23:44:58.570387: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
Path(TRAIN_GLAUCOMA_IMAGES[0]).stem
TEST_GLAUCOMA_IMAGES
array(['/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_052.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_053.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_079.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_086.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_087.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_050.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_054.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_083.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_082.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_055.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_043.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_056.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_025.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_019.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_030.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_027.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_023.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_020.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_034.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_021.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_039.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_011.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_005.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_006.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_003.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_029.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_001.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_014.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_028.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_067.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_073.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_070.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_059.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_065.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_071.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_074.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_048.png',
'/Users/kunaltamhane/Downloads/archive (2)/Test-20211018T060000Z-001/Test/Images/glaucoma/drishtiGS_077.png'],
dtype='<U106')
def load_image_and_gt(path, test = False):
print(path)
filename = Path(path).stem
if test:
path_to_OD_softmap = os.path.join(PATH_TO_TEST, 'Test_GT', filename, 'SoftMap', filename + '_ODsegSoftmap.png')
path_to_cup_softmap = os.path.join(PATH_TO_TEST, 'Test_GT', filename, 'SoftMap', filename + '_cupsegSoftmap.png')
else:
path_to_OD_softmap = os.path.join(PATH_TO_TRAINING, 'GT', filename, 'SoftMap', filename + '_ODsegSoftmap.png')
path_to_cup_softmap = os.path.join(PATH_TO_TRAINING, 'GT', filename, 'SoftMap', filename + '_cupsegSoftmap.png')
image = tf.io.read_file(path)
image = tf.io.decode_png(image)
od = tf.io.read_file(path_to_OD_softmap)
od = tf.io.decode_png(od)
cup = tf.io.read_file(path_to_cup_softmap)
cup = tf.io.decode_png(cup)
return image, od, cup
def show_image_with_masks(image, od, cup, ax, index, label):
ax[5 * index].imshow(image)
ax[5 * index].set_title(label)
ax[5 * index + 1].imshow(od)
ax[5 * index + 1].set_title('Optical Disk Mask')
ax[5 * index + 2].imshow(od * image)
ax[5 * index + 2].set_title('Image with OD Mask')
ax[5 * index + 3].imshow(cup)
ax[5 * index + 3].set_title('Cup Mask')
ax[5 * index + 4].imshow(cup * image)
ax[5 * index + 4].set_title('Image with CUP Mask')
def load_image_with_masks(path, dice = False, test = False):
filename = tf.strings.split(path, sep = '.')[-2]
filename = tf.strings.split(filename, sep = '/')[-1]
directory_path = tf.strings.split(path, sep = 'Images')[0]
if test:
softmap_path = tf.strings.join([directory_path, 'Test_GT/', filename, '/SoftMap/'])
else:
softmap_path = tf.strings.join([directory_path, 'GT/', filename, '/SoftMap/'])
od_path = tf.strings.join([softmap_path, filename, '_ODsegSoftmap.png'])
cup_path = tf.strings.join([softmap_path, filename, '_cupsegSoftmap.png'])
image = tf.io.read_file(path)
image = tf.io.decode_png(image, channels = 3)
image = tf.image.resize(image, IMAGE_SIZE)
image = image / 255.0
od = tf.io.read_file(od_path)
od = tf.io.decode_png(od, channels = 1)
od = tf.image.resize(od, IMAGE_SIZE)
od = od / 255.0
if dice:
od = tf.where(od >= 0.5, 1.0, 0.0)
cup = tf.io.read_file(cup_path)
cup = tf.io.decode_png(cup, channels = 1)
cup = tf.image.resize(cup, IMAGE_SIZE)
cup = cup / 255.0
if dice:
cup = tf.where(cup >= 0.5, 1.0, 0.0)
return {'image' : image}, {'od' : od, 'cup' : cup}
def augment_images(image, od, cup, single_target = False):
seeds = STATELESS_RNG.make_seeds(2)[0], STATELESS_RNG.make_seeds(2)[0]
def augment(img, seeds):
img = tf.image.stateless_random_flip_left_right(img, seeds[0])
img = tf.image.stateless_random_flip_up_down(img, seeds[1])
img = tf.image.resize_with_crop_or_pad(img, IMAGE_SIZE[0] + 20, IMAGE_SIZE[1] + 20)
img = tf.image.stateless_random_crop(img, size = (*IMAGE_SIZE, img.shape[2]), seed = seeds[0])
return img
image = augment(image, seeds)
od = augment(od, seeds)
cup = augment(cup, seeds)
if single_target:
return image, od
return {'image' : image}, {'od' : od, 'cup' : cup}
image, od, cup = load_image_and_gt(TRAIN_NORMAL_IMAGES[0])
plt.imshow(od)
image.shape
/Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/NORMAL/drishtiGS_046.png
TensorShape([1749, 2049, 3])
rows = 6
cols = 5
fig, axes = plt.subplots(rows, cols, figsize = (15, 15))
axes = axes.flatten()
normal_images = np.random.choice(TRAIN_NORMAL_IMAGES, rows // 2)
glaucoma_images = np.random.choice(TRAIN_GLAUCOMA_IMAGES, rows - (rows // 2))
for idx, file in enumerate(normal_images):
image, od, cup = load_image_and_gt(file)
show_image_with_masks(image, od, cup, axes, index = idx, label = 'Normal :' + os.path.basename(file))
for idx, file in enumerate(glaucoma_images):
image, od, cup = load_image_and_gt(file)
show_image_with_masks(image, od, cup, axes, index = idx + rows // 2, label = 'Glaucoma :' + os.path.basename(file))
plt.tight_layout()
/Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/NORMAL/drishtiGS_017.png /Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/NORMAL/drishtiGS_089.png /Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/NORMAL/drishtiGS_018.png /Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/GLAUCOMA/drishtiGS_010.png /Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/GLAUCOMA/drishtiGS_066.png /Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/GLAUCOMA/drishtiGS_031.png
def get_avgboundary(file):
image = tf.io.read_file(file)
image = tf.io.decode_png(image)
cup_mask = np.zeros(shape = (image.shape[0], image.shape[1]))
od_mask = np.zeros(shape = (image.shape[0], image.shape[1]))
center_mask = np.zeros(shape = (image.shape[0], image.shape[1]))
filename = Path(file).stem
boundary_path = os.path.join(PATH_TO_TRAINING, 'GT', filename, 'AvgBoundary')
cup_file = os.path.join(boundary_path, filename + '_CupAvgBoundary.txt')
od_file = os.path.join(boundary_path, filename + '_ODAvgBoundary.txt')
center_file = os.path.join(boundary_path, filename + '_diskCenter.txt')
count = 0
with open(cup_file) as cf:
for line in cf:
a, b = line.split()
a = int(a)
b = int(b)
cup_mask[a, b] = 255
count += 1
with open(od_file) as of:
for line in of:
a, b = line.split()
a = int(a)
b = int(b)
od_mask[a, b] = 255
with open(center_file) as ctrf:
for line in ctrf:
a, b = line.split()
a = int(a)
b = int(b)
center_mask[a, b] = 255
return od_mask.reshape(image.shape[0], image.shape[1], 1), \
cup_mask.reshape(image.shape[0], image.shape[1], 1), center_mask.reshape(image.shape[0], image.shape[1], 1)
glaucoma_avgboundary_test = TRAIN_GLAUCOMA_IMAGES[3]
od_mask, cup_mask, center_mask = get_avgboundary(glaucoma_avgboundary_test)
image, od, cup = load_image_and_gt(glaucoma_avgboundary_test)
/Users/kunaltamhane/Downloads/archive (2)/Training-20211018T055246Z-001/Training/Images/GLAUCOMA/drishtiGS_044.png
fig, axes = plt.subplots(2, 3, figsize = (20, 20))
axes = axes.flatten()
axes[0].imshow(image)
axes[0].set_title('Test Image')
axes[1].imshow(od)
axes[1].set_title('Optical Disk Mask')
axes[2].imshow(od_mask)
axes[2].set_title('Optical Disk Avg Boundary')
axes[3].imshow(cup)
axes[3].set_title('Cup Mask')
axes[4].imshow(cup_mask)
axes[4].set_title('Cup Avg Boundary')
axes[5].imshow(center_mask)
axes[5].set_title('Center Avg Boundary')
plt.tight_layout()
mask = od_mask[..., 0]
disc_coords = np.where(mask == 1)
x_center = np.mean(disc_coords[1])
y_center = np.mean(disc_coords[0])
print(x_center)
print(y_center)
nan nan
/Users/kunaltamhane/opt/anaconda3/lib/python3.9/site-packages/numpy/core/fromnumeric.py:3440: RuntimeWarning: Mean of empty slice. return _methods._mean(a, axis=axis, dtype=dtype, /Users/kunaltamhane/opt/anaconda3/lib/python3.9/site-packages/numpy/core/_methods.py:189: RuntimeWarning: invalid value encountered in double_scalars ret = ret.dtype.type(ret / rcount)
train_images = np.concatenate([TRAIN_NORMAL_IMAGES, TRAIN_GLAUCOMA_IMAGES])
test_images = np.concatenate([TEST_NORMAL_IMAGES, TEST_GLAUCOMA_IMAGES])
train_ds_dice = Dataset.from_tensor_slices(train_images)\
.shuffle(buffer_size = SHUFFLE_BUFFER)\
.map(lambda x: load_image_with_masks(x, dice = True), num_parallel_calls = AUTOTUNE)\
.map(lambda image, targets: augment_images(image['image'], targets['od'], targets['cup']))\
.batch(batch_size = BATCH_SIZE)\
.prefetch(1)\
.cache()
train_ds_no_dice = Dataset.from_tensor_slices(train_images)\
.shuffle(buffer_size = SHUFFLE_BUFFER)\
.map(lambda x: load_image_with_masks(x, dice = False), num_parallel_calls = AUTOTUNE)\
.map(lambda image, targets: augment_images(image['image'], targets['od'], targets['cup']))\
.batch(batch_size = BATCH_SIZE)\
.prefetch(1)\
.cache()
test_ds_dice = Dataset.from_tensor_slices(test_images)\
.map(lambda x: load_image_with_masks(x, dice = True, test = True), num_parallel_calls = AUTOTUNE)\
.batch(batch_size = BATCH_SIZE)
test_ds_no_dice = Dataset.from_tensor_slices(test_images)\
.map(lambda x: load_image_with_masks(x, dice = False, test = True), num_parallel_calls = AUTOTUNE)\
.batch(batch_size = BATCH_SIZE)
train_ds_no_dice_st = Dataset.from_tensor_slices(train_images)\
.shuffle(buffer_size = SHUFFLE_BUFFER)\
.map(lambda x: load_image_with_masks(x, dice = False), num_parallel_calls = AUTOTUNE)\
.map(lambda image, targets: augment_images(image['image'], targets['od'], targets['cup'], single_target = True))\
.batch(batch_size = BATCH_SIZE)\
.prefetch(1)\
.cache()
test_ds_no_dice_st = Dataset.from_tensor_slices(test_images)\
.map(lambda x: load_image_with_masks(x, dice = False, test = True), num_parallel_calls = AUTOTUNE)\
.map(lambda image, targets: (image['image'], targets['od']))\
.batch(batch_size = BATCH_SIZE)
def upsampling_block(inp, skips, filters, kernels, rates):
activation = 'relu'
x = inp
for f, kernel, skip, rate in zip(filters, kernels, skips, rates):
x = UpSampling2D()(x)
x = Conv2D(filters = f, kernel_size = kernel, strides = 1, activation = activation, padding = 'same')(x)
x = concatenate([x, skip])
x = BatchNormalization()(x)
x = SpatialDropout2D(rate = rate)(x)
return x
def conv_block(inp, filters, kernels, strides, rates):
activation = 'relu'
x = inp
skips = []
for f, kernel, stride, rate in zip(filters, kernels, strides, rates):
if stride == 2:
skips.append(skip)
x = MaxPool2D()(x)
x = Conv2D(filters = f, kernel_size = kernel, strides = 1, activation = activation, padding = 'same')(x)
skip = x
else:
x = Conv2D(filters = f, kernel_size = kernel, strides = 1, activation = activation, padding = 'same')(x)
skip = x
x = BatchNormalization()(x)
x = SpatialDropout2D(rate = rate)(x)
return skips, x
def res_unet(conv_filters, conv_kernels, conv_strides, conv_rates, up_filters, up_kernels, up_rates):
inp = Input(shape = (*IMAGE_SIZE, 3), name = 'image')
x = inp
skips, x = conv_block(x, conv_filters, conv_kernels, conv_strides, conv_rates)
skips = skips[::-1]
od = upsampling_block(x, skips, up_filters, up_kernels, up_rates)
od = Conv2D(filters = 1, kernel_size = 1, strides = 1, padding = 'same', activation = 'sigmoid', name = 'od')(od)
cup = upsampling_block(x, skips, up_filters, up_kernels, up_rates)
cup = Conv2D(filters = 1, kernel_size = 1, strides = 1, padding = 'same', activation = 'sigmoid', name = 'cup')(cup)
return Model(inputs = inp, outputs = [od, cup])
def res_unet_st(conv_filters, conv_kernels, conv_strides, conv_rates, up_filters, up_kernels, up_rates):
inp = Input(shape = (*IMAGE_SIZE, 3), name = 'image')
x = inp
skips, x = conv_block(x, conv_filters, conv_kernels, conv_strides, conv_rates)
skips = skips[::-1]
od = upsampling_block(x, skips, up_filters, up_kernels, up_rates)
od = Conv2D(filters = 1, kernel_size = 1, strides = 1, padding = 'same', activation = 'sigmoid', name = 'od')(od)
return Model(inputs = inp, outputs = od)
def dice(y, y_pred):
epsilon = EPSILON
numerator = 2 * tf.reduce_sum(y * y_pred, axis = [1, 2])
denominator = tf.reduce_sum(y + y_pred, axis = [1, 2])
dice = tf.reduce_mean((numerator + epsilon)/(denominator + epsilon))
return 1 - dice
conv_filters = [8, 16, 32, 64]
conv_kernels = [3, 3, 3, 3]
conv_strides = [1, 2, 2, 2]
conv_rates = [0.8, 0.8, 0.8, 0.8]
upsampling_filters = [32, 16, 8, 8]
upsampling_kernels = [3, 3, 3, 3]
upsampling_rates = [0.5, 0.5, 0.5, 0.5]
mod1 = res_unet(conv_filters, conv_kernels, conv_strides, conv_rates, upsampling_filters, upsampling_kernels, upsampling_rates)
mod1.compile(optimizer = 'Adam', loss = {'od' : dice, 'cup' : dice}, metrics = ['accuracy'])
plot_model(mod1)
You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work.
mod2 = res_unet(conv_filters, conv_kernels, conv_strides, conv_rates, upsampling_filters, upsampling_kernels, upsampling_rates)
mod2.compile(optimizer = 'Adam', loss = {'od' : 'binary_crossentropy', 'cup' : 'binary_crossentropy'}, metrics = ['accuracy'])
hist2 = mod2.fit(train_ds_no_dice, validation_data = test_ds_no_dice, callbacks = [early_stopping], epochs = 100)
Epoch 1/100 2/2 [==============================] - 22s 10s/step - loss: 1.8453 - od_loss: 0.9583 - cup_loss: 0.8870 - od_accuracy: 0.5841 - cup_accuracy: 0.4556 - val_loss: 1.3302 - val_od_loss: 0.6443 - val_cup_loss: 0.6859 - val_od_accuracy: 0.9622 - val_cup_accuracy: 0.9441 Epoch 2/100 2/2 [==============================] - 15s 9s/step - loss: 1.6316 - od_loss: 0.8632 - cup_loss: 0.7684 - od_accuracy: 0.6075 - cup_accuracy: 0.5407 - val_loss: 1.3143 - val_od_loss: 0.6359 - val_cup_loss: 0.6784 - val_od_accuracy: 0.9648 - val_cup_accuracy: 0.9698 Epoch 3/100 2/2 [==============================] - 15s 10s/step - loss: 1.7223 - od_loss: 0.9371 - cup_loss: 0.7852 - od_accuracy: 0.6057 - cup_accuracy: 0.5176 - val_loss: 1.2990 - val_od_loss: 0.6276 - val_cup_loss: 0.6714 - val_od_accuracy: 0.9659 - val_cup_accuracy: 0.9724 Epoch 4/100 2/2 [==============================] - 18s 11s/step - loss: 1.7132 - od_loss: 0.9885 - cup_loss: 0.7247 - od_accuracy: 0.6200 - cup_accuracy: 0.5976 - val_loss: 1.2835 - val_od_loss: 0.6188 - val_cup_loss: 0.6646 - val_od_accuracy: 0.9662 - val_cup_accuracy: 0.9742 Epoch 5/100 2/2 [==============================] - 16s 10s/step - loss: 1.7692 - od_loss: 1.0294 - cup_loss: 0.7397 - od_accuracy: 0.5548 - cup_accuracy: 0.6024 - val_loss: 1.2713 - val_od_loss: 0.6137 - val_cup_loss: 0.6576 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9758 Epoch 6/100 2/2 [==============================] - 15s 9s/step - loss: 1.6253 - od_loss: 0.9065 - cup_loss: 0.7188 - od_accuracy: 0.6231 - cup_accuracy: 0.6034 - val_loss: 1.2555 - val_od_loss: 0.6049 - val_cup_loss: 0.6506 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9767 Epoch 7/100 2/2 [==============================] - 14s 9s/step - loss: 1.5814 - od_loss: 0.8864 - cup_loss: 0.6951 - od_accuracy: 0.5869 - cup_accuracy: 0.5972 - val_loss: 1.2410 - val_od_loss: 0.5973 - val_cup_loss: 0.6437 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9775 Epoch 8/100 2/2 [==============================] - 15s 10s/step - loss: 1.5089 - od_loss: 0.8107 - cup_loss: 0.6982 - od_accuracy: 0.6446 - cup_accuracy: 0.6323 - val_loss: 1.2304 - val_od_loss: 0.5934 - val_cup_loss: 0.6370 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9780 Epoch 9/100 2/2 [==============================] - 14s 9s/step - loss: 1.6339 - od_loss: 0.9219 - cup_loss: 0.7120 - od_accuracy: 0.6621 - cup_accuracy: 0.6533 - val_loss: 1.2199 - val_od_loss: 0.5891 - val_cup_loss: 0.6309 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9783 Epoch 10/100 2/2 [==============================] - 15s 9s/step - loss: 1.5021 - od_loss: 0.7572 - cup_loss: 0.7449 - od_accuracy: 0.6531 - cup_accuracy: 0.6379 - val_loss: 1.2104 - val_od_loss: 0.5852 - val_cup_loss: 0.6252 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9785 Epoch 11/100 2/2 [==============================] - 15s 10s/step - loss: 1.5980 - od_loss: 0.8914 - cup_loss: 0.7066 - od_accuracy: 0.5938 - cup_accuracy: 0.6641 - val_loss: 1.2005 - val_od_loss: 0.5812 - val_cup_loss: 0.6193 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9785 Epoch 12/100 2/2 [==============================] - 14s 9s/step - loss: 1.5733 - od_loss: 0.8456 - cup_loss: 0.7277 - od_accuracy: 0.6175 - cup_accuracy: 0.6092 - val_loss: 1.1892 - val_od_loss: 0.5751 - val_cup_loss: 0.6141 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9786 Epoch 13/100 2/2 [==============================] - 15s 9s/step - loss: 1.6179 - od_loss: 0.8704 - cup_loss: 0.7474 - od_accuracy: 0.5997 - cup_accuracy: 0.6357 - val_loss: 1.1801 - val_od_loss: 0.5677 - val_cup_loss: 0.6123 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9786 Epoch 14/100 2/2 [==============================] - 15s 9s/step - loss: 1.4762 - od_loss: 0.8090 - cup_loss: 0.6673 - od_accuracy: 0.6615 - cup_accuracy: 0.6636 - val_loss: 1.1678 - val_od_loss: 0.5598 - val_cup_loss: 0.6080 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9787 Epoch 15/100 2/2 [==============================] - 14s 9s/step - loss: 1.5044 - od_loss: 0.7659 - cup_loss: 0.7384 - od_accuracy: 0.6415 - cup_accuracy: 0.6532 - val_loss: 1.1568 - val_od_loss: 0.5509 - val_cup_loss: 0.6059 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9788 Epoch 16/100 2/2 [==============================] - 15s 9s/step - loss: 1.5291 - od_loss: 0.7691 - cup_loss: 0.7600 - od_accuracy: 0.6082 - cup_accuracy: 0.6154 - val_loss: 1.1439 - val_od_loss: 0.5410 - val_cup_loss: 0.6029 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9788 Epoch 17/100 2/2 [==============================] - 15s 10s/step - loss: 1.4847 - od_loss: 0.7905 - cup_loss: 0.6942 - od_accuracy: 0.6299 - cup_accuracy: 0.5996 - val_loss: 1.1330 - val_od_loss: 0.5339 - val_cup_loss: 0.5990 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9788 Epoch 18/100 2/2 [==============================] - 17s 11s/step - loss: 1.5129 - od_loss: 0.7912 - cup_loss: 0.7217 - od_accuracy: 0.6807 - cup_accuracy: 0.6510 - val_loss: 1.1202 - val_od_loss: 0.5288 - val_cup_loss: 0.5914 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9789 Epoch 19/100 2/2 [==============================] - 18s 12s/step - loss: 1.4890 - od_loss: 0.7833 - cup_loss: 0.7058 - od_accuracy: 0.6484 - cup_accuracy: 0.7033 - val_loss: 1.1114 - val_od_loss: 0.5231 - val_cup_loss: 0.5884 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9789 Epoch 20/100 2/2 [==============================] - 19s 13s/step - loss: 1.3908 - od_loss: 0.7425 - cup_loss: 0.6483 - od_accuracy: 0.6911 - cup_accuracy: 0.7332 - val_loss: 1.1003 - val_od_loss: 0.5182 - val_cup_loss: 0.5822 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9789 Epoch 21/100 2/2 [==============================] - 19s 13s/step - loss: 1.4404 - od_loss: 0.7819 - cup_loss: 0.6585 - od_accuracy: 0.7024 - cup_accuracy: 0.7427 - val_loss: 1.0881 - val_od_loss: 0.5111 - val_cup_loss: 0.5770 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 22/100 2/2 [==============================] - 18s 13s/step - loss: 1.3587 - od_loss: 0.6968 - cup_loss: 0.6619 - od_accuracy: 0.7354 - cup_accuracy: 0.7348 - val_loss: 1.0744 - val_od_loss: 0.5032 - val_cup_loss: 0.5712 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 23/100 2/2 [==============================] - 19s 13s/step - loss: 1.4048 - od_loss: 0.7348 - cup_loss: 0.6701 - od_accuracy: 0.6981 - cup_accuracy: 0.7663 - val_loss: 1.0560 - val_od_loss: 0.4916 - val_cup_loss: 0.5644 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 24/100 2/2 [==============================] - 18s 12s/step - loss: 1.3162 - od_loss: 0.6740 - cup_loss: 0.6422 - od_accuracy: 0.7684 - cup_accuracy: 0.8105 - val_loss: 1.0403 - val_od_loss: 0.4821 - val_cup_loss: 0.5582 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 25/100 2/2 [==============================] - 17s 11s/step - loss: 1.4292 - od_loss: 0.7141 - cup_loss: 0.7151 - od_accuracy: 0.7029 - cup_accuracy: 0.7254 - val_loss: 1.0294 - val_od_loss: 0.4748 - val_cup_loss: 0.5545 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 26/100 2/2 [==============================] - 18s 12s/step - loss: 1.3387 - od_loss: 0.7001 - cup_loss: 0.6386 - od_accuracy: 0.7087 - cup_accuracy: 0.7926 - val_loss: 1.0233 - val_od_loss: 0.4712 - val_cup_loss: 0.5521 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 27/100 2/2 [==============================] - 20s 14s/step - loss: 1.3325 - od_loss: 0.7091 - cup_loss: 0.6234 - od_accuracy: 0.7278 - cup_accuracy: 0.7579 - val_loss: 1.0195 - val_od_loss: 0.4688 - val_cup_loss: 0.5507 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 28/100 2/2 [==============================] - 18s 11s/step - loss: 1.1910 - od_loss: 0.5735 - cup_loss: 0.6175 - od_accuracy: 0.8054 - cup_accuracy: 0.7984 - val_loss: 1.0107 - val_od_loss: 0.4661 - val_cup_loss: 0.5446 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 29/100 2/2 [==============================] - 16s 11s/step - loss: 1.3163 - od_loss: 0.7108 - cup_loss: 0.6055 - od_accuracy: 0.7286 - cup_accuracy: 0.7744 - val_loss: 1.0002 - val_od_loss: 0.4619 - val_cup_loss: 0.5383 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 30/100 2/2 [==============================] - 17s 11s/step - loss: 1.2573 - od_loss: 0.6690 - cup_loss: 0.5883 - od_accuracy: 0.7071 - cup_accuracy: 0.7936 - val_loss: 0.9904 - val_od_loss: 0.4578 - val_cup_loss: 0.5326 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 31/100 2/2 [==============================] - 16s 11s/step - loss: 1.3519 - od_loss: 0.6955 - cup_loss: 0.6564 - od_accuracy: 0.7601 - cup_accuracy: 0.7570 - val_loss: 0.9822 - val_od_loss: 0.4531 - val_cup_loss: 0.5292 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 32/100 2/2 [==============================] - 17s 11s/step - loss: 1.2803 - od_loss: 0.6806 - cup_loss: 0.5997 - od_accuracy: 0.7222 - cup_accuracy: 0.8085 - val_loss: 0.9731 - val_od_loss: 0.4477 - val_cup_loss: 0.5253 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 33/100 2/2 [==============================] - 17s 11s/step - loss: 1.3410 - od_loss: 0.7310 - cup_loss: 0.6100 - od_accuracy: 0.7027 - cup_accuracy: 0.8421 - val_loss: 0.9651 - val_od_loss: 0.4434 - val_cup_loss: 0.5217 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 34/100 2/2 [==============================] - 16s 10s/step - loss: 1.2667 - od_loss: 0.6757 - cup_loss: 0.5910 - od_accuracy: 0.7115 - cup_accuracy: 0.8646 - val_loss: 0.9539 - val_od_loss: 0.4358 - val_cup_loss: 0.5181 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 35/100 2/2 [==============================] - 17s 11s/step - loss: 1.2389 - od_loss: 0.6284 - cup_loss: 0.6105 - od_accuracy: 0.7638 - cup_accuracy: 0.8005 - val_loss: 0.9447 - val_od_loss: 0.4299 - val_cup_loss: 0.5148 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 36/100 2/2 [==============================] - 17s 11s/step - loss: 1.1995 - od_loss: 0.6226 - cup_loss: 0.5769 - od_accuracy: 0.8087 - cup_accuracy: 0.8211 - val_loss: 0.9375 - val_od_loss: 0.4252 - val_cup_loss: 0.5123 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 37/100 2/2 [==============================] - 17s 11s/step - loss: 1.2367 - od_loss: 0.6278 - cup_loss: 0.6090 - od_accuracy: 0.8214 - cup_accuracy: 0.8503 - val_loss: 0.9315 - val_od_loss: 0.4213 - val_cup_loss: 0.5102 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 38/100 2/2 [==============================] - 18s 11s/step - loss: 1.2330 - od_loss: 0.6698 - cup_loss: 0.5633 - od_accuracy: 0.7775 - cup_accuracy: 0.8996 - val_loss: 0.9243 - val_od_loss: 0.4162 - val_cup_loss: 0.5080 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 39/100 2/2 [==============================] - 17s 11s/step - loss: 1.1885 - od_loss: 0.6216 - cup_loss: 0.5669 - od_accuracy: 0.8333 - cup_accuracy: 0.8578 - val_loss: 0.9178 - val_od_loss: 0.4122 - val_cup_loss: 0.5056 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 40/100 2/2 [==============================] - 20s 12s/step - loss: 1.1741 - od_loss: 0.6040 - cup_loss: 0.5701 - od_accuracy: 0.8033 - cup_accuracy: 0.8588 - val_loss: 0.9121 - val_od_loss: 0.4086 - val_cup_loss: 0.5035 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 41/100 2/2 [==============================] - 17s 11s/step - loss: 1.2409 - od_loss: 0.6825 - cup_loss: 0.5584 - od_accuracy: 0.7039 - cup_accuracy: 0.8558 - val_loss: 0.9076 - val_od_loss: 0.4055 - val_cup_loss: 0.5021 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 42/100 2/2 [==============================] - 17s 12s/step - loss: 1.1796 - od_loss: 0.6120 - cup_loss: 0.5675 - od_accuracy: 0.8090 - cup_accuracy: 0.8559 - val_loss: 0.9023 - val_od_loss: 0.4014 - val_cup_loss: 0.5009 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 43/100 2/2 [==============================] - 17s 11s/step - loss: 1.1645 - od_loss: 0.6336 - cup_loss: 0.5309 - od_accuracy: 0.7771 - cup_accuracy: 0.8749 - val_loss: 0.8975 - val_od_loss: 0.3977 - val_cup_loss: 0.4998 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 44/100 2/2 [==============================] - 17s 12s/step - loss: 1.2076 - od_loss: 0.6366 - cup_loss: 0.5711 - od_accuracy: 0.8275 - cup_accuracy: 0.8777 - val_loss: 0.8931 - val_od_loss: 0.3947 - val_cup_loss: 0.4984 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 45/100 2/2 [==============================] - 17s 11s/step - loss: 1.1689 - od_loss: 0.6182 - cup_loss: 0.5506 - od_accuracy: 0.8368 - cup_accuracy: 0.8787 - val_loss: 0.8892 - val_od_loss: 0.3925 - val_cup_loss: 0.4967 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 46/100 2/2 [==============================] - 17s 11s/step - loss: 1.1605 - od_loss: 0.6383 - cup_loss: 0.5222 - od_accuracy: 0.8707 - cup_accuracy: 0.9235 - val_loss: 0.8860 - val_od_loss: 0.3911 - val_cup_loss: 0.4948 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 47/100 2/2 [==============================] - 18s 12s/step - loss: 1.1211 - od_loss: 0.6315 - cup_loss: 0.4896 - od_accuracy: 0.8070 - cup_accuracy: 0.9243 - val_loss: 0.8825 - val_od_loss: 0.3901 - val_cup_loss: 0.4924 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 48/100 2/2 [==============================] - 16s 11s/step - loss: 1.0883 - od_loss: 0.5785 - cup_loss: 0.5098 - od_accuracy: 0.8555 - cup_accuracy: 0.9050 - val_loss: 0.8769 - val_od_loss: 0.3885 - val_cup_loss: 0.4883 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 49/100 2/2 [==============================] - 18s 11s/step - loss: 1.0440 - od_loss: 0.5574 - cup_loss: 0.4865 - od_accuracy: 0.9117 - cup_accuracy: 0.9499 - val_loss: 0.8722 - val_od_loss: 0.3875 - val_cup_loss: 0.4847 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 50/100 2/2 [==============================] - 18s 12s/step - loss: 1.0803 - od_loss: 0.5869 - cup_loss: 0.4933 - od_accuracy: 0.8811 - cup_accuracy: 0.9471 - val_loss: 0.8661 - val_od_loss: 0.3860 - val_cup_loss: 0.4801 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 51/100 2/2 [==============================] - 17s 11s/step - loss: 1.1145 - od_loss: 0.6282 - cup_loss: 0.4863 - od_accuracy: 0.8958 - cup_accuracy: 0.9537 - val_loss: 0.8590 - val_od_loss: 0.3832 - val_cup_loss: 0.4758 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 52/100 2/2 [==============================] - 19s 12s/step - loss: 1.0633 - od_loss: 0.5691 - cup_loss: 0.4941 - od_accuracy: 0.9135 - cup_accuracy: 0.9640 - val_loss: 0.8518 - val_od_loss: 0.3803 - val_cup_loss: 0.4715 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 53/100 2/2 [==============================] - 18s 11s/step - loss: 1.0187 - od_loss: 0.5289 - cup_loss: 0.4899 - od_accuracy: 0.9310 - cup_accuracy: 0.9555 - val_loss: 0.8450 - val_od_loss: 0.3780 - val_cup_loss: 0.4670 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 54/100 2/2 [==============================] - 19s 11s/step - loss: 1.0022 - od_loss: 0.5332 - cup_loss: 0.4690 - od_accuracy: 0.9344 - cup_accuracy: 0.9454 - val_loss: 0.8387 - val_od_loss: 0.3762 - val_cup_loss: 0.4625 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 55/100 2/2 [==============================] - 18s 12s/step - loss: 1.0144 - od_loss: 0.5411 - cup_loss: 0.4733 - od_accuracy: 0.8557 - cup_accuracy: 0.9500 - val_loss: 0.8304 - val_od_loss: 0.3739 - val_cup_loss: 0.4565 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 56/100 2/2 [==============================] - 17s 11s/step - loss: 0.9897 - od_loss: 0.5307 - cup_loss: 0.4590 - od_accuracy: 0.9054 - cup_accuracy: 0.9544 - val_loss: 0.8219 - val_od_loss: 0.3715 - val_cup_loss: 0.4504 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 57/100 2/2 [==============================] - 19s 13s/step - loss: 0.9986 - od_loss: 0.5124 - cup_loss: 0.4862 - od_accuracy: 0.9267 - cup_accuracy: 0.9477 - val_loss: 0.8145 - val_od_loss: 0.3689 - val_cup_loss: 0.4456 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 58/100 2/2 [==============================] - 17s 11s/step - loss: 0.9977 - od_loss: 0.5251 - cup_loss: 0.4726 - od_accuracy: 0.8979 - cup_accuracy: 0.9664 - val_loss: 0.8078 - val_od_loss: 0.3662 - val_cup_loss: 0.4416 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 59/100 2/2 [==============================] - 19s 13s/step - loss: 1.0076 - od_loss: 0.5396 - cup_loss: 0.4680 - od_accuracy: 0.9131 - cup_accuracy: 0.9629 - val_loss: 0.8019 - val_od_loss: 0.3633 - val_cup_loss: 0.4386 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 60/100 2/2 [==============================] - 22s 12s/step - loss: 0.9652 - od_loss: 0.5191 - cup_loss: 0.4461 - od_accuracy: 0.9284 - cup_accuracy: 0.9664 - val_loss: 0.7962 - val_od_loss: 0.3605 - val_cup_loss: 0.4357 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 61/100 2/2 [==============================] - 15s 10s/step - loss: 0.9325 - od_loss: 0.4818 - cup_loss: 0.4507 - od_accuracy: 0.9287 - cup_accuracy: 0.9650 - val_loss: 0.7901 - val_od_loss: 0.3577 - val_cup_loss: 0.4324 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 62/100 2/2 [==============================] - 16s 10s/step - loss: 0.9680 - od_loss: 0.5199 - cup_loss: 0.4481 - od_accuracy: 0.9316 - cup_accuracy: 0.9605 - val_loss: 0.7841 - val_od_loss: 0.3554 - val_cup_loss: 0.4287 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 63/100 2/2 [==============================] - 19s 13s/step - loss: 0.9430 - od_loss: 0.5089 - cup_loss: 0.4341 - od_accuracy: 0.9439 - cup_accuracy: 0.9692 - val_loss: 0.7786 - val_od_loss: 0.3541 - val_cup_loss: 0.4245 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 64/100 2/2 [==============================] - 17s 11s/step - loss: 0.8961 - od_loss: 0.4578 - cup_loss: 0.4382 - od_accuracy: 0.9485 - cup_accuracy: 0.9663 - val_loss: 0.7717 - val_od_loss: 0.3521 - val_cup_loss: 0.4196 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 65/100 2/2 [==============================] - 16s 11s/step - loss: 0.9129 - od_loss: 0.4677 - cup_loss: 0.4453 - od_accuracy: 0.9318 - cup_accuracy: 0.9673 - val_loss: 0.7650 - val_od_loss: 0.3501 - val_cup_loss: 0.4149 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 66/100 2/2 [==============================] - 17s 11s/step - loss: 0.9352 - od_loss: 0.5064 - cup_loss: 0.4288 - od_accuracy: 0.9484 - cup_accuracy: 0.9661 - val_loss: 0.7582 - val_od_loss: 0.3476 - val_cup_loss: 0.4106 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 67/100 2/2 [==============================] - 18s 12s/step - loss: 0.8780 - od_loss: 0.4704 - cup_loss: 0.4075 - od_accuracy: 0.9450 - cup_accuracy: 0.9705 - val_loss: 0.7529 - val_od_loss: 0.3459 - val_cup_loss: 0.4071 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 68/100 2/2 [==============================] - 18s 12s/step - loss: 0.8563 - od_loss: 0.4535 - cup_loss: 0.4028 - od_accuracy: 0.9327 - cup_accuracy: 0.9722 - val_loss: 0.7462 - val_od_loss: 0.3430 - val_cup_loss: 0.4032 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 69/100 2/2 [==============================] - 16s 11s/step - loss: 0.8853 - od_loss: 0.4621 - cup_loss: 0.4232 - od_accuracy: 0.9207 - cup_accuracy: 0.9738 - val_loss: 0.7383 - val_od_loss: 0.3397 - val_cup_loss: 0.3985 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 70/100 2/2 [==============================] - 18s 12s/step - loss: 0.8383 - od_loss: 0.4322 - cup_loss: 0.4061 - od_accuracy: 0.9423 - cup_accuracy: 0.9712 - val_loss: 0.7307 - val_od_loss: 0.3368 - val_cup_loss: 0.3939 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 71/100 2/2 [==============================] - 17s 11s/step - loss: 0.8230 - od_loss: 0.4219 - cup_loss: 0.4011 - od_accuracy: 0.9548 - cup_accuracy: 0.9724 - val_loss: 0.7223 - val_od_loss: 0.3335 - val_cup_loss: 0.3888 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 72/100 2/2 [==============================] - 19s 13s/step - loss: 0.8311 - od_loss: 0.4483 - cup_loss: 0.3828 - od_accuracy: 0.9242 - cup_accuracy: 0.9687 - val_loss: 0.7144 - val_od_loss: 0.3302 - val_cup_loss: 0.3843 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 73/100 2/2 [==============================] - 18s 12s/step - loss: 0.8440 - od_loss: 0.4582 - cup_loss: 0.3859 - od_accuracy: 0.9232 - cup_accuracy: 0.9704 - val_loss: 0.7071 - val_od_loss: 0.3275 - val_cup_loss: 0.3796 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 74/100 2/2 [==============================] - 18s 13s/step - loss: 0.8370 - od_loss: 0.4510 - cup_loss: 0.3860 - od_accuracy: 0.9154 - cup_accuracy: 0.9668 - val_loss: 0.6995 - val_od_loss: 0.3246 - val_cup_loss: 0.3749 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 75/100 2/2 [==============================] - 18s 12s/step - loss: 0.8227 - od_loss: 0.4366 - cup_loss: 0.3861 - od_accuracy: 0.9266 - cup_accuracy: 0.9726 - val_loss: 0.6918 - val_od_loss: 0.3213 - val_cup_loss: 0.3705 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 76/100 2/2 [==============================] - 18s 12s/step - loss: 0.8552 - od_loss: 0.4488 - cup_loss: 0.4063 - od_accuracy: 0.9472 - cup_accuracy: 0.9703 - val_loss: 0.6842 - val_od_loss: 0.3182 - val_cup_loss: 0.3661 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 77/100 2/2 [==============================] - 19s 14s/step - loss: 0.7630 - od_loss: 0.3803 - cup_loss: 0.3827 - od_accuracy: 0.9573 - cup_accuracy: 0.9750 - val_loss: 0.6773 - val_od_loss: 0.3155 - val_cup_loss: 0.3618 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 78/100 2/2 [==============================] - 17s 11s/step - loss: 0.7620 - od_loss: 0.4016 - cup_loss: 0.3604 - od_accuracy: 0.9525 - cup_accuracy: 0.9732 - val_loss: 0.6708 - val_od_loss: 0.3133 - val_cup_loss: 0.3576 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 79/100 2/2 [==============================] - 18s 12s/step - loss: 0.7687 - od_loss: 0.3984 - cup_loss: 0.3704 - od_accuracy: 0.9556 - cup_accuracy: 0.9747 - val_loss: 0.6641 - val_od_loss: 0.3107 - val_cup_loss: 0.3534 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 80/100 2/2 [==============================] - 18s 12s/step - loss: 0.8093 - od_loss: 0.4582 - cup_loss: 0.3511 - od_accuracy: 0.9309 - cup_accuracy: 0.9588 - val_loss: 0.6581 - val_od_loss: 0.3083 - val_cup_loss: 0.3498 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 81/100 2/2 [==============================] - 17s 11s/step - loss: 0.7392 - od_loss: 0.3687 - cup_loss: 0.3705 - od_accuracy: 0.9597 - cup_accuracy: 0.9761 - val_loss: 0.6523 - val_od_loss: 0.3063 - val_cup_loss: 0.3460 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 82/100 2/2 [==============================] - 17s 12s/step - loss: 0.7619 - od_loss: 0.3951 - cup_loss: 0.3668 - od_accuracy: 0.9566 - cup_accuracy: 0.9737 - val_loss: 0.6463 - val_od_loss: 0.3042 - val_cup_loss: 0.3420 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 83/100 2/2 [==============================] - 19s 12s/step - loss: 0.7480 - od_loss: 0.3964 - cup_loss: 0.3515 - od_accuracy: 0.9404 - cup_accuracy: 0.9763 - val_loss: 0.6403 - val_od_loss: 0.3021 - val_cup_loss: 0.3381 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 84/100 2/2 [==============================] - 17s 11s/step - loss: 0.7483 - od_loss: 0.3834 - cup_loss: 0.3649 - od_accuracy: 0.9566 - cup_accuracy: 0.9742 - val_loss: 0.6338 - val_od_loss: 0.2998 - val_cup_loss: 0.3340 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 85/100 2/2 [==============================] - 17s 11s/step - loss: 0.7475 - od_loss: 0.3915 - cup_loss: 0.3560 - od_accuracy: 0.9585 - cup_accuracy: 0.9759 - val_loss: 0.6270 - val_od_loss: 0.2975 - val_cup_loss: 0.3295 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 86/100 2/2 [==============================] - 18s 12s/step - loss: 0.7533 - od_loss: 0.3908 - cup_loss: 0.3625 - od_accuracy: 0.9574 - cup_accuracy: 0.9735 - val_loss: 0.6206 - val_od_loss: 0.2952 - val_cup_loss: 0.3254 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 87/100 2/2 [==============================] - 18s 12s/step - loss: 0.7455 - od_loss: 0.3981 - cup_loss: 0.3474 - od_accuracy: 0.9576 - cup_accuracy: 0.9748 - val_loss: 0.6147 - val_od_loss: 0.2929 - val_cup_loss: 0.3218 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 88/100 2/2 [==============================] - 17s 11s/step - loss: 0.6954 - od_loss: 0.3635 - cup_loss: 0.3319 - od_accuracy: 0.9607 - cup_accuracy: 0.9773 - val_loss: 0.6088 - val_od_loss: 0.2905 - val_cup_loss: 0.3184 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 89/100 2/2 [==============================] - 17s 11s/step - loss: 0.6995 - od_loss: 0.3811 - cup_loss: 0.3183 - od_accuracy: 0.9565 - cup_accuracy: 0.9735 - val_loss: 0.6033 - val_od_loss: 0.2880 - val_cup_loss: 0.3153 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 90/100 2/2 [==============================] - 20s 13s/step - loss: 0.7245 - od_loss: 0.3643 - cup_loss: 0.3601 - od_accuracy: 0.9617 - cup_accuracy: 0.9747 - val_loss: 0.5981 - val_od_loss: 0.2860 - val_cup_loss: 0.3120 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 91/100 2/2 [==============================] - 17s 11s/step - loss: 0.6673 - od_loss: 0.3456 - cup_loss: 0.3216 - od_accuracy: 0.9566 - cup_accuracy: 0.9753 - val_loss: 0.5933 - val_od_loss: 0.2843 - val_cup_loss: 0.3089 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 92/100 2/2 [==============================] - 18s 12s/step - loss: 0.6720 - od_loss: 0.3442 - cup_loss: 0.3278 - od_accuracy: 0.9584 - cup_accuracy: 0.9774 - val_loss: 0.5884 - val_od_loss: 0.2827 - val_cup_loss: 0.3057 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 93/100 2/2 [==============================] - 18s 12s/step - loss: 0.6682 - od_loss: 0.3404 - cup_loss: 0.3278 - od_accuracy: 0.9602 - cup_accuracy: 0.9759 - val_loss: 0.5832 - val_od_loss: 0.2809 - val_cup_loss: 0.3023 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 94/100 2/2 [==============================] - 19s 13s/step - loss: 0.6357 - od_loss: 0.3405 - cup_loss: 0.2952 - od_accuracy: 0.9606 - cup_accuracy: 0.9795 - val_loss: 0.5775 - val_od_loss: 0.2789 - val_cup_loss: 0.2986 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 95/100 2/2 [==============================] - 18s 12s/step - loss: 0.6550 - od_loss: 0.3596 - cup_loss: 0.2954 - od_accuracy: 0.9423 - cup_accuracy: 0.9785 - val_loss: 0.5719 - val_od_loss: 0.2769 - val_cup_loss: 0.2950 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 96/100 2/2 [==============================] - 19s 13s/step - loss: 0.6374 - od_loss: 0.3224 - cup_loss: 0.3150 - od_accuracy: 0.9605 - cup_accuracy: 0.9766 - val_loss: 0.5665 - val_od_loss: 0.2752 - val_cup_loss: 0.2914 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 97/100 2/2 [==============================] - 20s 13s/step - loss: 0.6627 - od_loss: 0.3586 - cup_loss: 0.3041 - od_accuracy: 0.9603 - cup_accuracy: 0.9769 - val_loss: 0.5614 - val_od_loss: 0.2736 - val_cup_loss: 0.2878 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 98/100 2/2 [==============================] - 19s 11s/step - loss: 0.6210 - od_loss: 0.3188 - cup_loss: 0.3021 - od_accuracy: 0.9607 - cup_accuracy: 0.9736 - val_loss: 0.5565 - val_od_loss: 0.2719 - val_cup_loss: 0.2846 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 99/100 2/2 [==============================] - 19s 12s/step - loss: 0.6378 - od_loss: 0.3438 - cup_loss: 0.2939 - od_accuracy: 0.9606 - cup_accuracy: 0.9774 - val_loss: 0.5515 - val_od_loss: 0.2700 - val_cup_loss: 0.2815 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 100/100 2/2 [==============================] - 19s 13s/step - loss: 0.6613 - od_loss: 0.3516 - cup_loss: 0.3097 - od_accuracy: 0.9578 - cup_accuracy: 0.9746 - val_loss: 0.5464 - val_od_loss: 0.2679 - val_cup_loss: 0.2785 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790
hist1 = mod1.fit(train_ds_dice, validation_data = test_ds_dice, callbacks = [early_stopping], epochs = 30)
Epoch 1/30 2/2 [==============================] - 42s 20s/step - loss: 1.9295 - od_loss: 0.9674 - cup_loss: 0.9621 - od_accuracy: 0.4908 - cup_accuracy: 0.4233 - val_loss: 1.9090 - val_od_loss: 0.9460 - val_cup_loss: 0.9631 - val_od_accuracy: 0.8323 - val_cup_accuracy: 0.0677 Epoch 2/30 2/2 [==============================] - 18s 11s/step - loss: 1.9247 - od_loss: 0.9683 - cup_loss: 0.9564 - od_accuracy: 0.4114 - cup_accuracy: 0.4135 - val_loss: 1.9066 - val_od_loss: 0.9442 - val_cup_loss: 0.9624 - val_od_accuracy: 0.8181 - val_cup_accuracy: 0.0950 Epoch 3/30 2/2 [==============================] - 17s 11s/step - loss: 1.9127 - od_loss: 0.9555 - cup_loss: 0.9572 - od_accuracy: 0.4289 - cup_accuracy: 0.4142 - val_loss: 1.9041 - val_od_loss: 0.9421 - val_cup_loss: 0.9620 - val_od_accuracy: 0.8167 - val_cup_accuracy: 0.1461 Epoch 4/30 2/2 [==============================] - 16s 11s/step - loss: 1.9025 - od_loss: 0.9539 - cup_loss: 0.9485 - od_accuracy: 0.5165 - cup_accuracy: 0.4293 - val_loss: 1.9021 - val_od_loss: 0.9405 - val_cup_loss: 0.9616 - val_od_accuracy: 0.9044 - val_cup_accuracy: 0.1542 Epoch 5/30 2/2 [==============================] - 21s 15s/step - loss: 1.9022 - od_loss: 0.9517 - cup_loss: 0.9504 - od_accuracy: 0.4867 - cup_accuracy: 0.4416 - val_loss: 1.9003 - val_od_loss: 0.9389 - val_cup_loss: 0.9613 - val_od_accuracy: 0.8329 - val_cup_accuracy: 0.1619 Epoch 6/30 2/2 [==============================] - 26s 15s/step - loss: 1.8986 - od_loss: 0.9468 - cup_loss: 0.9518 - od_accuracy: 0.4414 - cup_accuracy: 0.4426 - val_loss: 1.8986 - val_od_loss: 0.9377 - val_cup_loss: 0.9609 - val_od_accuracy: 0.8508 - val_cup_accuracy: 0.1726 Epoch 7/30 2/2 [==============================] - 41s 30s/step - loss: 1.8947 - od_loss: 0.9468 - cup_loss: 0.9479 - od_accuracy: 0.5371 - cup_accuracy: 0.4203 - val_loss: 1.8971 - val_od_loss: 0.9367 - val_cup_loss: 0.9605 - val_od_accuracy: 0.8836 - val_cup_accuracy: 0.1849 Epoch 8/30 2/2 [==============================] - 38s 20s/step - loss: 1.8736 - od_loss: 0.9340 - cup_loss: 0.9396 - od_accuracy: 0.5523 - cup_accuracy: 0.4471 - val_loss: 1.8953 - val_od_loss: 0.9354 - val_cup_loss: 0.9599 - val_od_accuracy: 0.9160 - val_cup_accuracy: 0.2015 Epoch 9/30 2/2 [==============================] - 27s 18s/step - loss: 1.8590 - od_loss: 0.9331 - cup_loss: 0.9259 - od_accuracy: 0.5156 - cup_accuracy: 0.4442 - val_loss: 1.8937 - val_od_loss: 0.9345 - val_cup_loss: 0.9591 - val_od_accuracy: 0.9482 - val_cup_accuracy: 0.2288 Epoch 10/30 2/2 [==============================] - 26s 17s/step - loss: 1.8846 - od_loss: 0.9457 - cup_loss: 0.9389 - od_accuracy: 0.4643 - cup_accuracy: 0.4785 - val_loss: 1.8918 - val_od_loss: 0.9335 - val_cup_loss: 0.9582 - val_od_accuracy: 0.9614 - val_cup_accuracy: 0.2634 Epoch 11/30 2/2 [==============================] - 19s 11s/step - loss: 1.8596 - od_loss: 0.9305 - cup_loss: 0.9291 - od_accuracy: 0.5214 - cup_accuracy: 0.5345 - val_loss: 1.8899 - val_od_loss: 0.9327 - val_cup_loss: 0.9572 - val_od_accuracy: 0.9706 - val_cup_accuracy: 0.3144 Epoch 12/30 2/2 [==============================] - 16s 11s/step - loss: 1.8680 - od_loss: 0.9398 - cup_loss: 0.9282 - od_accuracy: 0.5576 - cup_accuracy: 0.4999 - val_loss: 1.8879 - val_od_loss: 0.9321 - val_cup_loss: 0.9558 - val_od_accuracy: 0.9748 - val_cup_accuracy: 0.3814 Epoch 13/30 2/2 [==============================] - 17s 11s/step - loss: 1.8649 - od_loss: 0.9344 - cup_loss: 0.9305 - od_accuracy: 0.5677 - cup_accuracy: 0.5578 - val_loss: 1.8859 - val_od_loss: 0.9314 - val_cup_loss: 0.9544 - val_od_accuracy: 0.9762 - val_cup_accuracy: 0.4539 Epoch 14/30 2/2 [==============================] - 16s 10s/step - loss: 1.8475 - od_loss: 0.9282 - cup_loss: 0.9193 - od_accuracy: 0.5763 - cup_accuracy: 0.5181 - val_loss: 1.8837 - val_od_loss: 0.9308 - val_cup_loss: 0.9528 - val_od_accuracy: 0.9770 - val_cup_accuracy: 0.5278 Epoch 15/30 2/2 [==============================] - 16s 10s/step - loss: 1.8501 - od_loss: 0.9238 - cup_loss: 0.9263 - od_accuracy: 0.5603 - cup_accuracy: 0.5333 - val_loss: 1.8810 - val_od_loss: 0.9300 - val_cup_loss: 0.9511 - val_od_accuracy: 0.9763 - val_cup_accuracy: 0.5877 Epoch 16/30 2/2 [==============================] - 16s 10s/step - loss: 1.8500 - od_loss: 0.9269 - cup_loss: 0.9231 - od_accuracy: 0.5806 - cup_accuracy: 0.5502 - val_loss: 1.8781 - val_od_loss: 0.9290 - val_cup_loss: 0.9490 - val_od_accuracy: 0.9760 - val_cup_accuracy: 0.6403 Epoch 17/30 2/2 [==============================] - 15s 10s/step - loss: 1.8306 - od_loss: 0.9206 - cup_loss: 0.9100 - od_accuracy: 0.5356 - cup_accuracy: 0.5437 - val_loss: 1.8749 - val_od_loss: 0.9281 - val_cup_loss: 0.9468 - val_od_accuracy: 0.9753 - val_cup_accuracy: 0.6929 Epoch 18/30 2/2 [==============================] - 15s 10s/step - loss: 1.8313 - od_loss: 0.9123 - cup_loss: 0.9189 - od_accuracy: 0.5962 - cup_accuracy: 0.5358 - val_loss: 1.8714 - val_od_loss: 0.9271 - val_cup_loss: 0.9443 - val_od_accuracy: 0.9758 - val_cup_accuracy: 0.7490 Epoch 19/30 2/2 [==============================] - 17s 10s/step - loss: 1.8609 - od_loss: 0.9322 - cup_loss: 0.9287 - od_accuracy: 0.5690 - cup_accuracy: 0.5648 - val_loss: 1.8673 - val_od_loss: 0.9257 - val_cup_loss: 0.9415 - val_od_accuracy: 0.9758 - val_cup_accuracy: 0.7955 Epoch 20/30 2/2 [==============================] - 16s 11s/step - loss: 1.8390 - od_loss: 0.9151 - cup_loss: 0.9240 - od_accuracy: 0.5441 - cup_accuracy: 0.5914 - val_loss: 1.8624 - val_od_loss: 0.9243 - val_cup_loss: 0.9380 - val_od_accuracy: 0.9752 - val_cup_accuracy: 0.8429 Epoch 21/30 2/2 [==============================] - 17s 11s/step - loss: 1.8024 - od_loss: 0.9037 - cup_loss: 0.8987 - od_accuracy: 0.5954 - cup_accuracy: 0.5979 - val_loss: 1.8564 - val_od_loss: 0.9228 - val_cup_loss: 0.9336 - val_od_accuracy: 0.9762 - val_cup_accuracy: 0.8850 Epoch 22/30 2/2 [==============================] - 17s 11s/step - loss: 1.8352 - od_loss: 0.9222 - cup_loss: 0.9131 - od_accuracy: 0.5969 - cup_accuracy: 0.5795 - val_loss: 1.8508 - val_od_loss: 0.9216 - val_cup_loss: 0.9292 - val_od_accuracy: 0.9769 - val_cup_accuracy: 0.9103 Epoch 23/30 2/2 [==============================] - 16s 10s/step - loss: 1.8198 - od_loss: 0.9091 - cup_loss: 0.9107 - od_accuracy: 0.5799 - cup_accuracy: 0.5677 - val_loss: 1.8455 - val_od_loss: 0.9198 - val_cup_loss: 0.9257 - val_od_accuracy: 0.9763 - val_cup_accuracy: 0.9217 Epoch 24/30 2/2 [==============================] - 16s 10s/step - loss: 1.8130 - od_loss: 0.9078 - cup_loss: 0.9051 - od_accuracy: 0.5542 - cup_accuracy: 0.5892 - val_loss: 1.8405 - val_od_loss: 0.9188 - val_cup_loss: 0.9216 - val_od_accuracy: 0.9776 - val_cup_accuracy: 0.9354 Epoch 25/30 2/2 [==============================] - 16s 11s/step - loss: 1.7987 - od_loss: 0.8930 - cup_loss: 0.9057 - od_accuracy: 0.5694 - cup_accuracy: 0.5530 - val_loss: 1.8351 - val_od_loss: 0.9172 - val_cup_loss: 0.9179 - val_od_accuracy: 0.9782 - val_cup_accuracy: 0.9444 Epoch 26/30 2/2 [==============================] - 16s 10s/step - loss: 1.8302 - od_loss: 0.9073 - cup_loss: 0.9229 - od_accuracy: 0.5908 - cup_accuracy: 0.5900 - val_loss: 1.8296 - val_od_loss: 0.9159 - val_cup_loss: 0.9138 - val_od_accuracy: 0.9787 - val_cup_accuracy: 0.9518 Epoch 27/30 2/2 [==============================] - 27s 19s/step - loss: 1.7997 - od_loss: 0.8960 - cup_loss: 0.9038 - od_accuracy: 0.6044 - cup_accuracy: 0.5931 - val_loss: 1.8247 - val_od_loss: 0.9148 - val_cup_loss: 0.9099 - val_od_accuracy: 0.9784 - val_cup_accuracy: 0.9572 Epoch 28/30 2/2 [==============================] - 24s 14s/step - loss: 1.7926 - od_loss: 0.8956 - cup_loss: 0.8971 - od_accuracy: 0.5646 - cup_accuracy: 0.6142 - val_loss: 1.8202 - val_od_loss: 0.9140 - val_cup_loss: 0.9061 - val_od_accuracy: 0.9786 - val_cup_accuracy: 0.9623 Epoch 29/30 2/2 [==============================] - 21s 12s/step - loss: 1.7962 - od_loss: 0.9000 - cup_loss: 0.8963 - od_accuracy: 0.5526 - cup_accuracy: 0.6266 - val_loss: 1.8150 - val_od_loss: 0.9135 - val_cup_loss: 0.9015 - val_od_accuracy: 0.9783 - val_cup_accuracy: 0.9684 Epoch 30/30 2/2 [==============================] - 16s 10s/step - loss: 1.8170 - od_loss: 0.9148 - cup_loss: 0.9021 - od_accuracy: 0.6668 - cup_accuracy: 0.5796 - val_loss: 1.8097 - val_od_loss: 0.9127 - val_cup_loss: 0.8971 - val_od_accuracy: 0.9778 - val_cup_accuracy: 0.9724
def plot_loss(epochs, hist, loss):
'''
Shows the loss metrics
Uses a more fair comparison
Between Val and Train
By shifting the train loss
an epoch later
(Train loss is also hurt
by the dropout layers)
'''
plt.plot(epochs + .5, hist.history['loss'], 'r', label = 'Loss')
plt.plot(epochs + .5, hist.history['od_loss'], 'r.-', label = 'OD Loss')
plt.plot(epochs + .5, hist.history['cup_loss'], 'r+', label = 'Cup Loss')
plt.plot(epochs, hist.history['val_loss'], 'b', label = 'Val Loss')
plt.plot(epochs, hist.history['val_od_loss'], 'b.-', label = 'Val OD Loss')
plt.plot(epochs, hist.history['val_cup_loss'], 'b+', label = 'Val Cup Loss')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title(loss)
epochs1 = np.arange(0, len(hist1.history['loss']))
plot_loss(epochs1, hist1, 'Dice')
mod2 = res_unet(conv_filters, conv_kernels, conv_strides, conv_rates, upsampling_filters, upsampling_kernels, upsampling_rates)
mod2.compile(optimizer = 'Adam', loss = {'od' : 'binary_crossentropy', 'cup' : 'binary_crossentropy'}, metrics = ['accuracy'])
hist2 = mod2.fit(train_ds_no_dice, validation_data = test_ds_no_dice, callbacks = [early_stopping], epochs = 30)
Epoch 1/30 2/2 [==============================] - 21s 10s/step - loss: 1.8149 - od_loss: 0.9811 - cup_loss: 0.8338 - od_accuracy: 0.4031 - cup_accuracy: 0.5321 - val_loss: 1.4048 - val_od_loss: 0.6966 - val_cup_loss: 0.7082 - val_od_accuracy: 0.2569 - val_cup_accuracy: 0.1761 Epoch 2/30 2/2 [==============================] - 16s 10s/step - loss: 1.8553 - od_loss: 0.9807 - cup_loss: 0.8746 - od_accuracy: 0.3920 - cup_accuracy: 0.5865 - val_loss: 1.3886 - val_od_loss: 0.6959 - val_cup_loss: 0.6927 - val_od_accuracy: 0.2877 - val_cup_accuracy: 0.5066 Epoch 3/30 2/2 [==============================] - 15s 9s/step - loss: 1.8391 - od_loss: 0.9894 - cup_loss: 0.8497 - od_accuracy: 0.4037 - cup_accuracy: 0.5784 - val_loss: 1.3779 - val_od_loss: 0.6945 - val_cup_loss: 0.6834 - val_od_accuracy: 0.4201 - val_cup_accuracy: 0.8491 Epoch 4/30 2/2 [==============================] - 17s 10s/step - loss: 1.7702 - od_loss: 0.9200 - cup_loss: 0.8502 - od_accuracy: 0.4767 - cup_accuracy: 0.6089 - val_loss: 1.3618 - val_od_loss: 0.6862 - val_cup_loss: 0.6756 - val_od_accuracy: 0.8221 - val_cup_accuracy: 0.9609 Epoch 5/30 2/2 [==============================] - 20s 13s/step - loss: 1.7429 - od_loss: 0.8669 - cup_loss: 0.8760 - od_accuracy: 0.4653 - cup_accuracy: 0.6024 - val_loss: 1.3443 - val_od_loss: 0.6756 - val_cup_loss: 0.6686 - val_od_accuracy: 0.9422 - val_cup_accuracy: 0.9773 Epoch 6/30 2/2 [==============================] - 22s 15s/step - loss: 1.8718 - od_loss: 0.9716 - cup_loss: 0.9002 - od_accuracy: 0.3583 - cup_accuracy: 0.5843 - val_loss: 1.3289 - val_od_loss: 0.6692 - val_cup_loss: 0.6597 - val_od_accuracy: 0.9538 - val_cup_accuracy: 0.9806 Epoch 7/30 2/2 [==============================] - 19s 11s/step - loss: 1.6272 - od_loss: 0.8872 - cup_loss: 0.7400 - od_accuracy: 0.4546 - cup_accuracy: 0.6768 - val_loss: 1.3180 - val_od_loss: 0.6637 - val_cup_loss: 0.6543 - val_od_accuracy: 0.9575 - val_cup_accuracy: 0.9806 Epoch 8/30 2/2 [==============================] - 16s 10s/step - loss: 1.5508 - od_loss: 0.8136 - cup_loss: 0.7371 - od_accuracy: 0.5042 - cup_accuracy: 0.6113 - val_loss: 1.3092 - val_od_loss: 0.6618 - val_cup_loss: 0.6473 - val_od_accuracy: 0.9627 - val_cup_accuracy: 0.9798 Epoch 9/30 2/2 [==============================] - 15s 10s/step - loss: 1.6660 - od_loss: 0.8643 - cup_loss: 0.8017 - od_accuracy: 0.4252 - cup_accuracy: 0.6470 - val_loss: 1.2986 - val_od_loss: 0.6582 - val_cup_loss: 0.6404 - val_od_accuracy: 0.9645 - val_cup_accuracy: 0.9794 Epoch 10/30 2/2 [==============================] - 16s 10s/step - loss: 1.6089 - od_loss: 0.8642 - cup_loss: 0.7447 - od_accuracy: 0.4561 - cup_accuracy: 0.6744 - val_loss: 1.2870 - val_od_loss: 0.6536 - val_cup_loss: 0.6334 - val_od_accuracy: 0.9653 - val_cup_accuracy: 0.9791 Epoch 11/30 2/2 [==============================] - 16s 10s/step - loss: 1.7437 - od_loss: 0.8134 - cup_loss: 0.9302 - od_accuracy: 0.5166 - cup_accuracy: 0.5330 - val_loss: 1.2727 - val_od_loss: 0.6458 - val_cup_loss: 0.6269 - val_od_accuracy: 0.9658 - val_cup_accuracy: 0.9790 Epoch 12/30 2/2 [==============================] - 16s 10s/step - loss: 1.6539 - od_loss: 0.8472 - cup_loss: 0.8067 - od_accuracy: 0.4772 - cup_accuracy: 0.6645 - val_loss: 1.2602 - val_od_loss: 0.6395 - val_cup_loss: 0.6207 - val_od_accuracy: 0.9658 - val_cup_accuracy: 0.9790 Epoch 13/30 2/2 [==============================] - 16s 11s/step - loss: 1.6221 - od_loss: 0.8417 - cup_loss: 0.7804 - od_accuracy: 0.5038 - cup_accuracy: 0.6390 - val_loss: 1.2491 - val_od_loss: 0.6345 - val_cup_loss: 0.6146 - val_od_accuracy: 0.9660 - val_cup_accuracy: 0.9790 Epoch 14/30 2/2 [==============================] - 17s 11s/step - loss: 1.5777 - od_loss: 0.7646 - cup_loss: 0.8130 - od_accuracy: 0.5436 - cup_accuracy: 0.5884 - val_loss: 1.2431 - val_od_loss: 0.6311 - val_cup_loss: 0.6120 - val_od_accuracy: 0.9661 - val_cup_accuracy: 0.9790 Epoch 15/30 2/2 [==============================] - 15s 10s/step - loss: 1.5216 - od_loss: 0.8545 - cup_loss: 0.6671 - od_accuracy: 0.4929 - cup_accuracy: 0.6849 - val_loss: 1.2354 - val_od_loss: 0.6255 - val_cup_loss: 0.6099 - val_od_accuracy: 0.9662 - val_cup_accuracy: 0.9790 Epoch 16/30 2/2 [==============================] - 15s 10s/step - loss: 1.5147 - od_loss: 0.8061 - cup_loss: 0.7086 - od_accuracy: 0.5063 - cup_accuracy: 0.6925 - val_loss: 1.2263 - val_od_loss: 0.6199 - val_cup_loss: 0.6064 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 17/30 2/2 [==============================] - 15s 10s/step - loss: 1.5082 - od_loss: 0.7503 - cup_loss: 0.7579 - od_accuracy: 0.5434 - cup_accuracy: 0.6692 - val_loss: 1.2199 - val_od_loss: 0.6179 - val_cup_loss: 0.6019 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 18/30 2/2 [==============================] - 15s 10s/step - loss: 1.5082 - od_loss: 0.7357 - cup_loss: 0.7726 - od_accuracy: 0.5724 - cup_accuracy: 0.6783 - val_loss: 1.2112 - val_od_loss: 0.6159 - val_cup_loss: 0.5953 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 19/30 2/2 [==============================] - 16s 10s/step - loss: 1.3797 - od_loss: 0.7154 - cup_loss: 0.6643 - od_accuracy: 0.5610 - cup_accuracy: 0.7273 - val_loss: 1.2005 - val_od_loss: 0.6128 - val_cup_loss: 0.5877 - val_od_accuracy: 0.9662 - val_cup_accuracy: 0.9790 Epoch 20/30 2/2 [==============================] - 15s 10s/step - loss: 1.4383 - od_loss: 0.7230 - cup_loss: 0.7153 - od_accuracy: 0.5614 - cup_accuracy: 0.7521 - val_loss: 1.1900 - val_od_loss: 0.6116 - val_cup_loss: 0.5784 - val_od_accuracy: 0.9662 - val_cup_accuracy: 0.9790 Epoch 21/30 2/2 [==============================] - 17s 10s/step - loss: 1.3920 - od_loss: 0.6493 - cup_loss: 0.7427 - od_accuracy: 0.6240 - cup_accuracy: 0.6932 - val_loss: 1.1795 - val_od_loss: 0.6101 - val_cup_loss: 0.5694 - val_od_accuracy: 0.9662 - val_cup_accuracy: 0.9790 Epoch 22/30 2/2 [==============================] - 16s 10s/step - loss: 1.3682 - od_loss: 0.6744 - cup_loss: 0.6938 - od_accuracy: 0.6161 - cup_accuracy: 0.7557 - val_loss: 1.1700 - val_od_loss: 0.6090 - val_cup_loss: 0.5610 - val_od_accuracy: 0.9662 - val_cup_accuracy: 0.9790 Epoch 23/30 2/2 [==============================] - 18s 10s/step - loss: 1.3523 - od_loss: 0.6851 - cup_loss: 0.6672 - od_accuracy: 0.5399 - cup_accuracy: 0.7186 - val_loss: 1.1605 - val_od_loss: 0.6074 - val_cup_loss: 0.5531 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 24/30 2/2 [==============================] - 17s 11s/step - loss: 1.3447 - od_loss: 0.7020 - cup_loss: 0.6427 - od_accuracy: 0.5918 - cup_accuracy: 0.7702 - val_loss: 1.1524 - val_od_loss: 0.6066 - val_cup_loss: 0.5458 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 25/30 2/2 [==============================] - 16s 10s/step - loss: 1.3336 - od_loss: 0.6764 - cup_loss: 0.6572 - od_accuracy: 0.6148 - cup_accuracy: 0.7958 - val_loss: 1.1414 - val_od_loss: 0.6039 - val_cup_loss: 0.5375 - val_od_accuracy: 0.9663 - val_cup_accuracy: 0.9790 Epoch 26/30 2/2 [==============================] - 18s 11s/step - loss: 1.3675 - od_loss: 0.6934 - cup_loss: 0.6740 - od_accuracy: 0.5641 - cup_accuracy: 0.7541 - val_loss: 1.1304 - val_od_loss: 0.6004 - val_cup_loss: 0.5299 - val_od_accuracy: 0.9664 - val_cup_accuracy: 0.9790 Epoch 27/30 2/2 [==============================] - 20s 14s/step - loss: 1.3586 - od_loss: 0.6987 - cup_loss: 0.6599 - od_accuracy: 0.6402 - cup_accuracy: 0.8030 - val_loss: 1.1197 - val_od_loss: 0.5974 - val_cup_loss: 0.5223 - val_od_accuracy: 0.9667 - val_cup_accuracy: 0.9790 Epoch 28/30 2/2 [==============================] - 18s 12s/step - loss: 1.2884 - od_loss: 0.6159 - cup_loss: 0.6724 - od_accuracy: 0.6933 - cup_accuracy: 0.7362 - val_loss: 1.1062 - val_od_loss: 0.5913 - val_cup_loss: 0.5149 - val_od_accuracy: 0.9670 - val_cup_accuracy: 0.9790 Epoch 29/30 2/2 [==============================] - 19s 12s/step - loss: 1.3281 - od_loss: 0.6573 - cup_loss: 0.6708 - od_accuracy: 0.6337 - cup_accuracy: 0.7226 - val_loss: 1.0934 - val_od_loss: 0.5867 - val_cup_loss: 0.5066 - val_od_accuracy: 0.9668 - val_cup_accuracy: 0.9790 Epoch 30/30 2/2 [==============================] - 18s 13s/step - loss: 1.3601 - od_loss: 0.6808 - cup_loss: 0.6793 - od_accuracy: 0.5885 - cup_accuracy: 0.6908 - val_loss: 1.0827 - val_od_loss: 0.5815 - val_cup_loss: 0.5012 - val_od_accuracy: 0.9666 - val_cup_accuracy: 0.9790
epochs2 = np.arange(0, len(hist2.history['loss']))
plot_loss(epochs2, hist2, 'Binary CrossEntropy')
to_predict = Dataset.from_tensor_slices(test_images)\
.map(lambda x: load_image_with_masks(x, dice = True, test = True), num_parallel_calls = AUTOTUNE)\
.map(lambda image, targets: image['image'])\
.batch(batch_size = BATCH_SIZE)
predict1 = mod1.predict(to_predict)
predict2 = mod2.predict(to_predict)
3/3 [==============================] - 6s 925ms/step 3/3 [==============================] - 5s 661ms/step
test = next(iter(predict1))
test.shape
(51, 224, 224, 1)
def plt_preds(ds, preds, super_title, binarizer_threshold = 0.5):
ds = next(iter(ds))
image, targets = ds
image = image['image']
od = targets['od']
cup = targets['cup']
preds = next(iter(preds))
rows = 5
cols = 7
fig, axes = plt.subplots(rows, cols, figsize = (20, 20))
axes = axes.flatten()
for i in range(rows):
axes[cols * i].imshow(image[i])
axes[cols * i].set_title('Original Image')
axes[cols * i + 1].imshow(od[i])
axes[cols * i + 1].set_title('Original OD Mask')
axes[cols * i + 2].imshow(cup[i])
axes[cols * i + 2].set_title('Original Cup Mask')
axes[cols * i + 3].imshow(preds[i])
axes[cols * i + 3].set_title('Predicted OD Mask')
axes[cols * i + 4].imshow(np.where(preds[i] > binarizer_threshold, 1, 0))
axes[cols * i + 4].set_title('Binarized OD Mask')
axes[cols * i + 5].imshow(preds[26 + i - 1])
axes[cols * i + 5].set_title('Predicted Cup Mask')
axes[cols * i + 6].imshow(np.where(preds[26 + i] >= binarizer_threshold, 1, 0))
axes[cols * i + 6].set_title('Binarized Cup Mask')
plt.suptitle(super_title)
'''def plt_preds_cdr(ds, preds, super_title, binarizer_threshold = 0.5):
#Plots row numbers of images
#Along with the original mask
#And the predicted mask
#Also binarizes the predicted masks
#Calculates and displays the cup-to-disc ratio
ds = next(iter(ds))
image, targets = ds
image = image['image']
od = targets['od']
cup = targets['cup']
preds = next(iter(preds))
rows = 5
cols = 8
fig, axes = plt.subplots(rows, cols, figsize = (20, 20))
axes = axes.flatten()
for i in range(rows):
axes[cols * i].imshow(image[i])
axes[cols * i].set_title('Original Image')
axes[cols * i + 1].imshow(od[i])
axes[cols * i + 1].set_title('Original OD Mask')
axes[cols * i + 2].imshow(cup[i])
axes[cols * i + 2].set_title('Original Cup Mask')
axes[cols * i + 3].imshow(preds[i])
axes[cols * i + 3].set_title('Predicted OD Mask')
axes[cols * i + 4].imshow(np.where(preds[i] > binarizer_threshold, 1, 0))
axes[cols * i + 4].set_title('Binarized OD Mask')
axes[cols * i + 5].imshow(preds[26 + i - 1])
axes[cols * i + 5].set_title('Predicted Cup Mask')
axes[cols * i + 6].imshow(np.where(preds[26 + i] >= binarizer_threshold, 1, 0))
axes[cols * i + 6].set_title('Binarized Cup Mask')
#Original calculate and display cup-to-disc ratio
od_area = np.count_nonzero(od[i])
cup_area = np.count_nonzero(np.where(preds[26 + i] >= binarizer_threshold, 1, 0))
cdr = cup_area / od_area
axes[cols * i + 7].text(0.5, 0.5, f'CDR: {cdr:.2f}', fontsize=12, ha='center', va='center')
axes[cols * i + 7].axis('off')
plt.suptitle(super_title)
print(cdr)
od_area = np.count_nonzero(od[i])
cup_area = np.count_nonzero(binarized_cup_mask)
cdr = cup_area / od_area
cdr_list.append(cdr)
axes[cols * i + 7].text(0.5, 0.5, f'CDR: {cdr:.2f}', fontsize=12, ha='center', va='center')
axes[cols * i + 7].axis('off')
def fit_svm(cdr_list, labels, super_title):
# fit SVM model to CDR values
svm = SVC(kernel='linear')
svm.fit(np.array(cdr_list).reshape(-1, 1), labels)
# plot SVM decision boundary
x_min, x_max = np.array(cdr_list).min() - 0.1, np.array(cdr_list).max() + 0.1
xx = np.linspace(x_min, x_max, 10)
w = svm.coef_[0]
a = -w[0] / w[1]
yy = a * xx - (svm.intercept_[0]) / w[1]
fig, ax = plt.subplots()
ax.scatter(cdr_list, labels)
ax.plot(xx, yy, 'k-')
ax.set_xlabel('CDR')
ax.set_ylabel('Label')
ax.set_title('SVM Decision Boundary')
plt.suptitle(super_title)
plt.show()
# predict labels and calculate accuracy
preds = svm.predict(np.array(cdr_list).reshape(-1, 1))
accuracy = accuracy_score(labels, preds)
# plot accuracy over time
fig, ax = plt.subplots()
ax.plot(np.arange(len(cdr_list)), [accuracy] * len(cdr_list), 'r--', label='Accuracy')
ax.set_xlabel('Sample Index')
ax.set_ylabel('Accuracy')
ax.set_ylim([0, 1])
ax.set_title('Accuracy Over Time')
plt.suptitle(super_title)
plt.show()
return svm '''
File "/var/folders/yf/qfc040rn14b1x3c1ttqqpdfw0000gn/T/ipykernel_13351/4237397563.py", line 45 od_area = np.count_nonzero(od[i]) ^ IndentationError: unexpected indent
plt_preds(test_ds_dice, predict1, super_title = 'Dice Loss Trained on Binarized GT Masks', binarizer_threshold = 0.5)
plt_preds(test_ds_no_dice, predict2, binarizer_threshold = 0.25, super_title = 'Binary Cross Entropy Trained on Default Masks')
def fit_svm(cdr_list, labels, super_title):
# fit SVM model to CDR values
svm = SVC(kernel='linear')
svm.fit(np.array(cdr_list).reshape(-1, 1), labels)
# plot SVM decision boundary
x_min, x_max = np.array(cdr_list).min() - 0.1, np.array(cdr_list).max() + 0.1
xx = np.linspace(x_min, x_max, 10)
w = svm.coef_[0]
a = -w[0] / w[1]
yy = a * xx - (svm.intercept_[0]) / w[1]
fig, ax = plt.subplots()
ax.scatter(cdr_list, labels)
ax.plot(xx, yy, 'k-')
ax.set_xlabel('CDR')
ax.set_ylabel('Label')
ax.set_title('SVM Decision Boundary')
plt.suptitle(super_title)
plt.show()
# predict labels and calculate accuracy
preds = svm.predict(np.array(cdr_list).reshape(-1, 1))
accuracy = accuracy_score(labels, preds)
# plot accuracy over time
fig, ax = plt.subplots()
ax.plot(np.arange(len(cdr_list)), [accuracy] * len(cdr_list), 'r--', label='Accuracy')
ax.set_xlabel('Sample Index')
ax.set_ylabel('Accuracy')
ax.set_ylim([0, 1])
ax.set_title('Accuracy Over Time')
plt.suptitle(super_title)
plt.show()
return svm
import numpy as np
from sklearn.svm import SVC
def plt_preds_cdr_svm(ds, preds, super_title, binarizer_threshold = 0.5):
ds = next(iter(ds))
image, targets = ds
image = image['image']
od = targets['od']
cup = targets['cup']
preds = next(iter(preds))
rows = 5
cols = 8
fig, axes = plt.subplots(rows, cols, figsize = (20, 20))
axes = axes.flatten()
cdr_list = [] # store CDR values in a list
for i in range(rows):
axes[cols * i].imshow(image[i])
axes[cols * i].set_title('Original Image')
axes[cols * i + 1].imshow(od[i])
axes[cols * i + 1].set_title('Original OD Mask')
axes[cols * i + 2].imshow(cup[i])
axes[cols * i + 2].set_title('Original Cup Mask')
axes[cols * i + 3].imshow(preds[i])
axes[cols * i + 3].set_title('Predicted OD Mask')
axes[cols * i + 4].imshow(np.where(preds[i] > binarizer_threshold, 1, 0))
axes[cols * i + 4].set_title('Binarized OD Mask')
axes[cols * i + 5].imshow(preds[26 + i - 1])
axes[cols * i + 5].set_title('Predicted Cup Mask')
binarized_cup_mask = np.where(preds[26 + i] >= binarizer_threshold, 1, 0)
axes[cols * i + 6].imshow(binarized_cup_mask)
axes[cols * i + 6].set_title('Binarized Cup Mask')
# calculate and display cup-to-disc ratio
od_area = np.count_nonzero(od[i])
cup_area = np.count_nonzero(binarized_cup_mask)
cdr = cup_area / od_area
cdr_list.append(cdr)
axes[cols * i + 7].text(0.5, 0.5, f'CDR: {cdr:.2f}', fontsize=12, ha='center', va='center')
axes[cols * i + 7].axis('off')
print(cdr)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) /var/folders/yf/qfc040rn14b1x3c1ttqqpdfw0000gn/T/ipykernel_13351/1539682593.py in <module> 1 for i in range(rows): ----> 2 axes[cols * i].imshow(image[i]) 3 axes[cols * i].set_title('Original Image') 4 axes[cols * i + 1].imshow(od[i]) 5 axes[cols * i + 1].set_title('Original OD Mask') NameError: name 'axes' is not defined
def fit_svm(cdr_list, labels, super_title):
# fit SVM model to CDR values
svm = SVC(kernel='linear')
svm.fit(np.array(cdr_list).reshape(-1, 1), labels)
# plot SVM decision boundary
x_min, x_max = np.array(cdr_list).min() - 0.1, np.array(cdr_list).max() + 0.1
xx = np.linspace(x_min, x_max, 10)
w = svm.coef_[0]
a = -w[0] / w[1]
yy = a * xx - (svm.intercept_[0]) / w[1]
fig, ax = plt.subplots()
ax.scatter(cdr_list, labels)
ax.plot(xx, yy, 'k-')
ax.set_xlabel('CDR')
ax.set_ylabel('Label')
ax.set_title('SVM Decision Boundary')
plt.suptitle(super_title)
plt.show()
# predict labels and calculate accuracy
preds = svm.predict(np.array(cdr_list).reshape(-1, 1))
accuracy = accuracy_score(labels, preds)
# plot accuracy over time
fig, ax = plt.subplots()
ax.plot(np.arange(len(cdr_list)), [accuracy] * len(cdr_list), 'r--', label='Accuracy')
ax.set_xlabel('Sample Index')
ax.set_ylabel('Accuracy')
ax.set_ylim([0, 1])
ax.set_title('Accuracy Over Time')
plt.suptitle(super_title)
plt.show()
return svm
from scipy import signal
import cv2
import sys
import pandas as pd
#import imhandle as imh
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import os
import xlrd
import math
from pylab import*
from scipy import signal
import numpy as np
from matplotlib import pyplot as plt
from skimage.feature import greycomatrix, greycoprops
from skimage import data
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(10,10))
#IMPORT GROUND TRUTH
wb = xlrd.open_workbook("http://localhost:8889/edit/Downloads/archive%20(2)/Training-20211018T055246Z-001/Training/GT/Notching___Image__level_decisions.xlsx")
sheet = wb.sheet_by_index(0)
val = [sheet.col_values(1)[5:],sheet.col_values(8)[5:]]
--------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) /var/folders/yf/qfc040rn14b1x3c1ttqqpdfw0000gn/T/ipykernel_13351/2783158373.py in <module> 1 #IMPORT GROUND TRUTH ----> 2 wb = xlrd.open_workbook("http://localhost:8889/edit/Downloads/archive%20(2)/Training-20211018T055246Z-001/Training/GT/Notching___Image__level_decisions.xlsx") 3 sheet = wb.sheet_by_index(0) 4 val = [sheet.col_values(1)[5:],sheet.col_values(8)[5:]] ~/opt/anaconda3/lib/python3.9/site-packages/xlrd/__init__.py in open_workbook(filename, logfile, verbosity, use_mmap, file_contents, encoding_override, formatting_info, on_demand, ragged_rows, ignore_workbook_corruption) 164 """ 165 --> 166 file_format = inspect_format(filename, file_contents) 167 # We have to let unknown file formats pass through here, as some ancient 168 # files that xlrd can parse don't start with the expected signature. ~/opt/anaconda3/lib/python3.9/site-packages/xlrd/__init__.py in inspect_format(path, content) 58 else: 59 path = os.path.expanduser(path) ---> 60 with open(path, "rb") as f: 61 peek = f.read(PEEK_SIZE) 62 FileNotFoundError: [Errno 2] No such file or directory: 'http://localhost:8889/edit/Downloads/archive%20(2)/Training-20211018T055246Z-001/Training/GT/Notching___Image__level_decisions.xlsx'
# Get the coordinates of the optic disc from the segmentation mask
mask = od_mask[..., 0]
disc_coords = np.where(mask == 1)
x_center = np.mean(disc_coords[1])
y_center = np.mean(disc_coords[0])
--------------------------------------------------------------------------- NameError Traceback (most recent call last) /var/folders/yf/qfc040rn14b1x3c1ttqqpdfw0000gn/T/ipykernel_13351/4029157456.py in <module> 1 # Get the coordinates of the optic disc from the segmentation mask ----> 2 mask = od_mask[..., 0] 3 disc_coords = np.where(mask == 1) 4 x_center = np.mean(disc_coords[1]) 5 y_center = np.mean(disc_coords[0]) NameError: name 'od_mask' is not defined